This commit enable the subregister liveness by default in RISC-V.
It was previously disabled in https://reviews.llvm.org/D129646 after a previous attempt to enabled it https://reviews.llvm.org/D128016.
We believe that https://reviews.llvm.org/D129735 fixes the issue that caused it to be disabled.
Reviewed By: craig.topper, kito-cheng
Differential Revision: https://reviews.llvm.org/D145546
#include "RISCVGenSubtargetInfo.inc"
static cl::opt<bool> EnableSubRegLiveness("riscv-enable-subreg-liveness",
- cl::init(false), cl::Hidden);
+ cl::init(true), cl::Hidden);
static cl::opt<unsigned> RVVVectorLMULMax(
"riscv-v-fixed-length-vector-lmul-max",
; CHECK-V-NEXT: sd a0, 8(sp)
; CHECK-V-NEXT: addi a0, sp, 24
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
-; CHECK-V-NEXT: mv a0, sp
; CHECK-V-NEXT: vle64.v v10, (a0)
+; CHECK-V-NEXT: mv a0, sp
+; CHECK-V-NEXT: vle64.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: addi a0, sp, 8
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 3
; CHECK-V-NEXT: lui a0, 524288
; CHECK-V-NEXT: addiw a1, a0, -1
-; CHECK-V-NEXT: vmin.vx v8, v10, a1
+; CHECK-V-NEXT: vmin.vx v8, v8, a1
; CHECK-V-NEXT: vmax.vx v10, v8, a0
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
; CHECK-V-NEXT: sd a0, 8(sp)
; CHECK-V-NEXT: addi a0, sp, 24
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
-; CHECK-V-NEXT: mv a0, sp
; CHECK-V-NEXT: vle64.v v10, (a0)
+; CHECK-V-NEXT: mv a0, sp
+; CHECK-V-NEXT: vle64.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: addi a0, sp, 8
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 3
; CHECK-V-NEXT: li a0, -1
; CHECK-V-NEXT: srli a0, a0, 32
-; CHECK-V-NEXT: vminu.vx v10, v10, a0
+; CHECK-V-NEXT: vminu.vx v10, v8, a0
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: sd a0, 8(sp)
; CHECK-V-NEXT: addi a0, sp, 24
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
-; CHECK-V-NEXT: mv a0, sp
; CHECK-V-NEXT: vle64.v v10, (a0)
+; CHECK-V-NEXT: mv a0, sp
+; CHECK-V-NEXT: vle64.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: addi a0, sp, 8
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 3
; CHECK-V-NEXT: li a0, -1
; CHECK-V-NEXT: srli a0, a0, 32
-; CHECK-V-NEXT: vmin.vx v8, v10, a0
+; CHECK-V-NEXT: vmin.vx v8, v8, a0
; CHECK-V-NEXT: vmax.vx v10, v8, zero
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
; CHECK-V-NEXT: sd a0, 8(sp)
; CHECK-V-NEXT: addi a0, sp, 24
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
-; CHECK-V-NEXT: mv a0, sp
; CHECK-V-NEXT: vle64.v v10, (a0)
+; CHECK-V-NEXT: mv a0, sp
+; CHECK-V-NEXT: vle64.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: addi a0, sp, 8
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 3
; CHECK-V-NEXT: lui a0, 524288
; CHECK-V-NEXT: addiw a1, a0, -1
-; CHECK-V-NEXT: vmin.vx v8, v10, a1
+; CHECK-V-NEXT: vmin.vx v8, v8, a1
; CHECK-V-NEXT: vmax.vx v10, v8, a0
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
; CHECK-V-NEXT: sd a0, 8(sp)
; CHECK-V-NEXT: addi a0, sp, 24
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
-; CHECK-V-NEXT: mv a0, sp
; CHECK-V-NEXT: vle64.v v10, (a0)
+; CHECK-V-NEXT: mv a0, sp
+; CHECK-V-NEXT: vle64.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: addi a0, sp, 8
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 3
; CHECK-V-NEXT: li a0, -1
; CHECK-V-NEXT: srli a0, a0, 32
-; CHECK-V-NEXT: vminu.vx v10, v10, a0
+; CHECK-V-NEXT: vminu.vx v10, v8, a0
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: sd a0, 8(sp)
; CHECK-V-NEXT: addi a0, sp, 24
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
-; CHECK-V-NEXT: mv a0, sp
; CHECK-V-NEXT: vle64.v v10, (a0)
+; CHECK-V-NEXT: mv a0, sp
+; CHECK-V-NEXT: vle64.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: addi a0, sp, 8
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vle64.v v8, (a0)
+; CHECK-V-NEXT: vle64.v v10, (a0)
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 3
; CHECK-V-NEXT: li a0, -1
; CHECK-V-NEXT: srli a0, a0, 32
-; CHECK-V-NEXT: vmin.vx v8, v10, a0
+; CHECK-V-NEXT: vmin.vx v8, v8, a0
; CHECK-V-NEXT: vmax.vx v10, v8, zero
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
ret <vscale x 4 x i32> %c
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
ret <vscale x 2 x i32> %c
define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 8 x i32> %c
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 4 x i32> %c
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 2 x i32> %c
define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv1i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 1 x i32> %c
define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_0(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 0)
ret <vscale x 2 x i8> %c
define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_0(<vscale x 16 x half> %vec) {
; CHECK-LABEL: extract_nxv2f16_nxv16f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 0)
ret <vscale x 2 x half> %c
define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_0(<vscale x 12 x half> %in) {
; CHECK-LABEL: extract_nxv6f16_nxv12f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4
; CHECK-NEXT: ret
%res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
ret <vscale x 6 x half> %res
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v14, v10, a0
-; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: vslidedown.vx v11, v10, a0
+; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v13, v14, 0
+; CHECK-NEXT: vslideup.vi v9, v11, 0
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT: vslideup.vx v12, v10, a0
-; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
ret <vscale x 6 x half> %res
define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfptrunc_v32f32_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v28, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; CHECK-NEXT: vfncvt.f.f.w v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB7_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB7_2:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
+; CHECK-NEXT: vmv1r.v v0, v28
+; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v16, v8, 16
+; CHECK-NEXT: vslideup.vi v16, v24, 16
; CHECK-NEXT: vmv8r.v v8, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double> %a, <32 x i1> %m, i32 %vl)
ret <32 x float> %v
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg3e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v6_v7_v8
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg4e8.v v5, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg5e8.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg6e8.v v3, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg7e8.v v2, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg8e8.v v1, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v1_v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
define void @store_factor2(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr) {
; CHECK-LABEL: store_factor2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vsseg2e8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.riscv.seg2.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, iXLen 8)
define void @store_factor3(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr) {
; CHECK-LABEL: store_factor3:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10 def $v8_v9_v10
-; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10 def $v8_v9_v10
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10 def $v8_v9_v10
; CHECK-NEXT: vsseg3e8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.riscv.seg3.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, iXLen 8)
define void @store_factor4(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr) {
; CHECK-LABEL: store_factor4:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
-; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
-; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
; CHECK-NEXT: vsseg4e8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.riscv.seg4.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, iXLen 8)
define void @store_factor5(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr) {
; CHECK-LABEL: store_factor5:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
-; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
-; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
-; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vsseg5e8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.riscv.seg5.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, iXLen 8)
define void @store_factor6(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr) {
; CHECK-LABEL: store_factor6:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vsseg6e8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.riscv.seg6.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, iXLen 8)
define void @store_factor7(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr) {
; CHECK-LABEL: store_factor7:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v14 killed $v14 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT: # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vsseg7e8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.riscv.seg7.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, iXLen 8)
define void @store_factor8(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr) {
; CHECK-LABEL: store_factor8:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v15 killed $v15 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT: # kill: def $v14 killed $v14 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT: # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vsseg8e8.v v8, (a0)
; CHECK-NEXT: ret
call void @llvm.riscv.seg8.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, iXLen 8)
define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v128i7_v128i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v28, v0
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 8
; CHECK-NEXT: addi a1, a0, -64
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: li a1, 64
-; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
+; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB4_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 64
; CHECK-NEXT: .LBB4_2:
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v28
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
-; CHECK-NEXT: vslideup.vx v16, v8, a1
+; CHECK-NEXT: vslideup.vx v16, v24, a1
; CHECK-NEXT: vmv8r.v v8, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl)
ret <128 x i7> %v
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a2, a2, a3
+; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 5
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a2, a2, a3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: .LBB16_4:
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a1)
+; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: addi a5, a7, -32
; CHECK-NEXT: sltu a6, a7, a5
; CHECK-NEXT: addi a6, a6, -1
; CHECK-NEXT: and a6, t0, a6
; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v4
-; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t
-; CHECK-NEXT: csrr a6, vlenb
-; CHECK-NEXT: slli a6, a6, 3
-; CHECK-NEXT: add a6, sp, a6
-; CHECK-NEXT: addi a6, a6, 16
-; CHECK-NEXT: vs8r.v v24, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
+; CHECK-NEXT: addi a6, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a6) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a5, a2, .LBB16_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a5, 16
; CHECK-NEXT: addi a1, a1, 256
; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v2
-; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
+; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: li t0, 40
; CHECK-NEXT: mul a5, a5, t0
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: .LBB16_8:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v4, v3, 2
+; CHECK-NEXT: vslidedown.vi v20, v3, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a6)
-; CHECK-NEXT: vle64.v v24, (a1)
+; CHECK-NEXT: vle64.v v24, (a6)
+; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: mv a1, a4
; CHECK-NEXT: bltu a4, a2, .LBB16_10
; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: vslidedown.vi v2, v1, 2
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v3
-; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 5
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a4, -16
; CHECK-NEXT: sltu a4, a4, a1
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a1, a4, a1
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v4
-; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
; CHECK-NEXT: bltu a7, a3, .LBB16_12
; CHECK-NEXT: # %bb.11:
; CHECK-NEXT: li a7, 32
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vslideup.vi v16, v24, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 5
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vslideup.vi v8, v16, 16
+; CHECK-NEXT: vslideup.vi v16, v8, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 5
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a7, -16
; CHECK-NEXT: sltu a4, a7, a1
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v2
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a4, 24
-; CHECK-NEXT: mul a1, a1, a4
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
-; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
; CHECK-NEXT: bltu a7, a2, .LBB16_14
; CHECK-NEXT: # %bb.13:
; CHECK-NEXT: li a7, 16
; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 5
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: vsetvli zero, a3, e32, m8, tu, ma
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vslideup.vi v24, v16, 16
-; CHECK-NEXT: vse32.v v24, (a0)
+; CHECK-NEXT: vslideup.vi v16, v8, 16
+; CHECK-NEXT: vse32.v v16, (a0)
; CHECK-NEXT: addi a1, a0, 256
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 5
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vse32.v v8, (a1)
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: csrr a2, vlenb
define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v32i32_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v28, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
+; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB17_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB17_2:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v28
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v16, v8, 16
+; CHECK-NEXT: vslideup.vi v16, v24, 16
; CHECK-NEXT: vmv8r.v v8, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64> %a, <32 x i1> %m, i32 %vl)
ret <32 x i32> %v
; RV32-V128-LABEL: interleave_v2f64:
; RV32-V128: # %bb.0:
; RV32-V128-NEXT: vmv1r.v v12, v9
-; RV32-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-V128-NEXT: vid.v v10
-; RV32-V128-NEXT: vsrl.vi v14, v10, 1
+; RV32-V128-NEXT: vid.v v9
+; RV32-V128-NEXT: vsrl.vi v14, v9, 1
; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14
; RV32-V128-NEXT: li a0, 10
; RV32-V128-NEXT: vmv.s.x v0, a0
-; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14
; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
; RV32-V128-NEXT: vmv.v.v v8, v10
; RV32-V128-NEXT: ret
; RV64-V128-LABEL: interleave_v2f64:
; RV64-V128: # %bb.0:
; RV64-V128-NEXT: vmv1r.v v12, v9
-; RV64-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV64-V128-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-V128-NEXT: vid.v v10
; RV64-V128-NEXT: vsrl.vi v14, v10, 1
+; RV64-V128-NEXT: vrgather.vv v10, v8, v14
; RV64-V128-NEXT: li a0, 10
; RV64-V128-NEXT: vmv.s.x v0, a0
-; RV64-V128-NEXT: vrgather.vv v10, v8, v14
; RV64-V128-NEXT: vrgather.vv v10, v12, v14, v0.t
; RV64-V128-NEXT: vmv.v.v v8, v10
; RV64-V128-NEXT: ret
; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-V128-NEXT: vle32.v v0, (a0)
; RV32-V128-NEXT: vmv8r.v v24, v8
-; RV32-V128-NEXT: addi a0, sp, 16
-; RV32-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: vrgather.vv v8, v24, v0
+; RV32-V128-NEXT: addi a0, sp, 16
+; RV32-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: lui a0, %hi(.LCPI10_1)
; RV32-V128-NEXT: addi a0, a0, %lo(.LCPI10_1)
; RV32-V128-NEXT: vle32.v v24, (a0)
; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-V128-NEXT: vle32.v v0, (a0)
; RV64-V128-NEXT: vmv8r.v v24, v8
-; RV64-V128-NEXT: addi a0, sp, 16
-; RV64-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: vrgather.vv v8, v24, v0
+; RV64-V128-NEXT: addi a0, sp, 16
+; RV64-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: lui a0, %hi(.LCPI10_1)
; RV64-V128-NEXT: addi a0, a0, %lo(.LCPI10_1)
; RV64-V128-NEXT: vle32.v v24, (a0)
;
; LMULMAX1-LABEL: insert_nxv8i32_v8i32_0:
; LMULMAX1: # %bb.0:
+; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v12, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
+; LMULMAX1-NEXT: vle32.v v12, (a1)
; LMULMAX1-NEXT: vle32.v v16, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v12, 0
+; LMULMAX1-NEXT: vslideup.vi v8, v16, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v16, 4
+; LMULMAX1-NEXT: vslideup.vi v8, v12, 4
; LMULMAX1-NEXT: ret
%sv = load <8 x i32>, ptr %svp
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
;
; LMULMAX1-LABEL: insert_nxv8i32_v8i32_8:
; LMULMAX1: # %bb.0:
+; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v12, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
+; LMULMAX1-NEXT: vle32.v v12, (a1)
; LMULMAX1-NEXT: vle32.v v16, (a0)
; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v12, 8
+; LMULMAX1-NEXT: vslideup.vi v8, v16, 8
; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v16, 12
+; LMULMAX1-NEXT: vslideup.vi v8, v12, 12
; LMULMAX1-NEXT: ret
%sv = load <8 x i32>, ptr %svp
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
; LMULMAX2-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; LMULMAX2-NEXT: vslidedown.vi v10, v8, 16
; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX2-NEXT: vslidedown.vi v14, v10, 8
+; LMULMAX2-NEXT: vslidedown.vi v9, v10, 8
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vsext.vf4 v16, v14
-; LMULMAX2-NEXT: vsext.vf4 v14, v8
+; LMULMAX2-NEXT: vsext.vf4 v14, v9
+; LMULMAX2-NEXT: vsext.vf4 v16, v8
; LMULMAX2-NEXT: vsext.vf4 v8, v10
; LMULMAX2-NEXT: addi a0, a1, 64
; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: vse32.v v14, (a1)
+; LMULMAX2-NEXT: vse32.v v16, (a1)
; LMULMAX2-NEXT: addi a0, a1, 96
-; LMULMAX2-NEXT: vse32.v v16, (a0)
+; LMULMAX2-NEXT: vse32.v v14, (a0)
; LMULMAX2-NEXT: addi a0, a1, 32
; LMULMAX2-NEXT: vse32.v v12, (a0)
; LMULMAX2-NEXT: ret
; RV32-V128-LABEL: interleave_v2i64:
; RV32-V128: # %bb.0:
; RV32-V128-NEXT: vmv1r.v v12, v9
-; RV32-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-V128-NEXT: vid.v v10
-; RV32-V128-NEXT: vsrl.vi v14, v10, 1
+; RV32-V128-NEXT: vid.v v9
+; RV32-V128-NEXT: vsrl.vi v14, v9, 1
; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14
; RV32-V128-NEXT: li a0, 10
; RV32-V128-NEXT: vmv.s.x v0, a0
-; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14
; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
; RV32-V128-NEXT: vmv.v.v v8, v10
; RV32-V128-NEXT: ret
; RV64-V128-LABEL: interleave_v2i64:
; RV64-V128: # %bb.0:
; RV64-V128-NEXT: vmv1r.v v12, v9
-; RV64-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV64-V128-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-V128-NEXT: vid.v v10
; RV64-V128-NEXT: vsrl.vi v14, v10, 1
+; RV64-V128-NEXT: vrgather.vv v10, v8, v14
; RV64-V128-NEXT: li a0, 10
; RV64-V128-NEXT: vmv.s.x v0, a0
-; RV64-V128-NEXT: vrgather.vv v10, v8, v14
; RV64-V128-NEXT: vrgather.vv v10, v12, v14, v0.t
; RV64-V128-NEXT: vmv.v.v v8, v10
; RV64-V128-NEXT: ret
; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-V128-NEXT: vle32.v v0, (a0)
; RV32-V128-NEXT: vmv8r.v v24, v8
-; RV32-V128-NEXT: addi a0, sp, 16
-; RV32-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: vrgather.vv v8, v24, v0
+; RV32-V128-NEXT: addi a0, sp, 16
+; RV32-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: lui a0, %hi(.LCPI15_1)
; RV32-V128-NEXT: addi a0, a0, %lo(.LCPI15_1)
; RV32-V128-NEXT: vle32.v v24, (a0)
; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-V128-NEXT: vle32.v v0, (a0)
; RV64-V128-NEXT: vmv8r.v v24, v8
-; RV64-V128-NEXT: addi a0, sp, 16
-; RV64-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: vrgather.vv v8, v24, v0
+; RV64-V128-NEXT: addi a0, sp, 16
+; RV64-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: lui a0, %hi(.LCPI15_1)
; RV64-V128-NEXT: addi a0, a0, %lo(.LCPI15_1)
; RV64-V128-NEXT: vle32.v v24, (a0)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 5
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m2
; CHECK-NEXT: ret
entry:
%1 = shufflevector <16 x i16> %0, <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; RV64ZVE32F-NEXT: bgez a2, .LBB98_23
; RV64ZVE32F-NEXT: # %bb.22: # %cond.load37
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v12, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: .LBB98_23: # %else38
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v12, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_55
; RV64ZVE32F-NEXT: # %bb.24: # %else41
; RV64ZVE32F-NEXT: slli a2, a1, 48
; RV64ZVE32F-NEXT: bgez a2, .LBB98_28
; RV64ZVE32F-NEXT: .LBB98_27: # %cond.load49
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: .LBB98_28: # %else50
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 45
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 2
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB98_30
; RV64ZVE32F-NEXT: # %bb.29: # %cond.load52
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: .LBB98_30: # %else53
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 44
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
; RV64ZVE32F-NEXT: bltz a2, .LBB98_58
; RV64ZVE32F-NEXT: # %bb.31: # %else56
; RV64ZVE32F-NEXT: slli a2, a1, 43
; RV64ZVE32F-NEXT: bgez a2, .LBB98_34
; RV64ZVE32F-NEXT: .LBB98_33: # %cond.load61
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v9, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v14, a2
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 21
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 21
; RV64ZVE32F-NEXT: .LBB98_34: # %else62
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 8
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 41
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v12, 2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_60
; RV64ZVE32F-NEXT: # %bb.35: # %else65
; RV64ZVE32F-NEXT: slli a2, a1, 40
; RV64ZVE32F-NEXT: bnez a2, .LBB98_14
; RV64ZVE32F-NEXT: j .LBB98_15
; RV64ZVE32F-NEXT: .LBB98_55: # %cond.load40
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v14, a2
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 14
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 14
; RV64ZVE32F-NEXT: slli a2, a1, 48
; RV64ZVE32F-NEXT: bgez a2, .LBB98_25
; RV64ZVE32F-NEXT: .LBB98_56: # %cond.load43
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: j .LBB98_28
; RV64ZVE32F-NEXT: .LBB98_58: # %cond.load55
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v14, a2
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 20, e8, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 19
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 19
; RV64ZVE32F-NEXT: slli a2, a1, 43
; RV64ZVE32F-NEXT: bgez a2, .LBB98_32
; RV64ZVE32F-NEXT: .LBB98_59: # %cond.load58
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lb a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v14, a2
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 21, e8, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 20
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 20
; RV64ZVE32F-NEXT: slli a2, a1, 42
; RV64ZVE32F-NEXT: bltz a2, .LBB98_33
; RV64ZVE32F-NEXT: j .LBB98_34
; RV64ZVE32F-NEXT: bgez a2, .LBB92_23
; RV64ZVE32F-NEXT: # %bb.22: # %cond.store25
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v12, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 13
; RV64ZVE32F-NEXT: .LBB92_23: # %else26
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v12, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_55
; RV64ZVE32F-NEXT: # %bb.24: # %else28
; RV64ZVE32F-NEXT: slli a2, a1, 48
; RV64ZVE32F-NEXT: bgez a2, .LBB92_28
; RV64ZVE32F-NEXT: .LBB92_27: # %cond.store33
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 17
; RV64ZVE32F-NEXT: .LBB92_28: # %else34
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 45
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v10, 2
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB92_30
; RV64ZVE32F-NEXT: # %bb.29: # %cond.store35
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 18
; RV64ZVE32F-NEXT: .LBB92_30: # %else36
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 44
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
; RV64ZVE32F-NEXT: bltz a2, .LBB92_58
; RV64ZVE32F-NEXT: # %bb.31: # %else38
; RV64ZVE32F-NEXT: slli a2, a1, 43
; RV64ZVE32F-NEXT: bgez a2, .LBB92_34
; RV64ZVE32F-NEXT: .LBB92_33: # %cond.store41
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v11, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 21
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 21
+; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_34: # %else42
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 8
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 41
-; RV64ZVE32F-NEXT: vslidedown.vi v11, v12, 2
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_60
; RV64ZVE32F-NEXT: # %bb.35: # %else44
; RV64ZVE32F-NEXT: slli a2, a1, 40
; RV64ZVE32F-NEXT: bnez a2, .LBB92_14
; RV64ZVE32F-NEXT: j .LBB92_15
; RV64ZVE32F-NEXT: .LBB92_55: # %cond.store27
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 14
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 14
+; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 48
; RV64ZVE32F-NEXT: bgez a2, .LBB92_25
; RV64ZVE32F-NEXT: .LBB92_56: # %cond.store29
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 15
; RV64ZVE32F-NEXT: j .LBB92_28
; RV64ZVE32F-NEXT: .LBB92_58: # %cond.store37
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v13
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 19
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 19
+; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 43
; RV64ZVE32F-NEXT: bgez a2, .LBB92_32
; RV64ZVE32F-NEXT: .LBB92_59: # %cond.store39
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 20
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 20
+; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 42
; RV64ZVE32F-NEXT: bltz a2, .LBB92_33
; RV64ZVE32F-NEXT: j .LBB92_34
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vfmv.s.f v24, fa0
+; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwadd.vv v0, v8, v16
+; CHECK-NEXT: vfwadd.vv v24, v8, v16
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfredusum.vs v8, v0, v24
+; CHECK-NEXT: vfredusum.vs v8, v24, v12
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x half>, ptr %x
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vfmv.s.f v24, fa0
+; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwredosum.vs v8, v8, v24
+; CHECK-NEXT: vfwredosum.vs v8, v8, v12
; CHECK-NEXT: vfwredosum.vs v8, v16, v8
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vsetivli zero, 16, e64, m1, ta, ma
-; CHECK-NEXT: vfmv.s.f v24, fa0
+; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vfwredosum.vs v8, v8, v24
+; CHECK-NEXT: vfwredosum.vs v8, v8, v12
; CHECK-NEXT: vfwredosum.vs v8, v16, v8
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v24, zero
+; CHECK-NEXT: vmv.s.x v12, zero
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vwadd.vv v0, v8, v16
+; CHECK-NEXT: vwadd.vv v24, v8, v16
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
-; CHECK-NEXT: vredsum.vs v8, v0, v24
+; CHECK-NEXT: vredsum.vs v8, v24, v12
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <128 x i8>, ptr %x
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v24, zero
+; CHECK-NEXT: vmv.s.x v12, zero
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vwaddu.vv v0, v8, v16
+; CHECK-NEXT: vwaddu.vv v24, v8, v16
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
-; CHECK-NEXT: vredsum.vs v8, v0, v24
+; CHECK-NEXT: vredsum.vs v8, v24, v12
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <128 x i8>, ptr %x
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v24, zero
+; CHECK-NEXT: vmv.s.x v12, zero
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vwadd.vv v0, v8, v16
+; CHECK-NEXT: vwadd.vv v24, v8, v16
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vredsum.vs v8, v0, v24
+; CHECK-NEXT: vredsum.vs v8, v24, v12
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <64 x i16>, ptr %x
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v24, zero
+; CHECK-NEXT: vmv.s.x v12, zero
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vwaddu.vv v0, v8, v16
+; CHECK-NEXT: vwaddu.vv v24, v8, v16
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vredsum.vs v8, v0, v24
+; CHECK-NEXT: vredsum.vs v8, v24, v12
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <64 x i16>, ptr %x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; RV32-NEXT: addi a1, a0, 128
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vle32.v v16, (a1)
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v8, 16
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v0, v16, 16
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv4r.v v8, v0
; RV32-NEXT: vwadd.vv v0, v24, v8
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vwadd.vv v0, v8, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vx v8, v8, a2
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: slli a2, a2, 4
; RV32-NEXT: add sp, sp, a2
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; RV64-NEXT: addi a1, a0, 128
; RV64-NEXT: li a2, 32
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vle32.v v16, (a1)
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v8, 16
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 24
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v24, v16, 16
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v0, v16, 16
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 24
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv4r.v v8, v0
; RV64-NEXT: vwadd.vv v0, v24, v8
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vwadd.vv v0, v8, v16
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vredsum.vs v8, v8, v16
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: add sp, sp, a1
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; RV32-NEXT: addi a1, a0, 128
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vle32.v v16, (a1)
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v8, 16
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v0, v16, 16
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv4r.v v8, v0
; RV32-NEXT: vwaddu.vv v0, v24, v8
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vwaddu.vv v0, v8, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vx v8, v8, a2
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: slli a2, a2, 4
; RV32-NEXT: add sp, sp, a2
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; RV64-NEXT: addi a1, a0, 128
; RV64-NEXT: li a2, 32
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vle32.v v16, (a1)
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v8, 16
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 24
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v24, v16, 16
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v0, v16, 16
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 24
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv4r.v v8, v0
; RV64-NEXT: vwaddu.vv v0, v24, v8
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vwaddu.vv v0, v8, v16
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vredsum.vs v8, v8, v16
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: add sp, sp, a1
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_0(<vscale x 1 x half> %subvec) {
; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m8
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 0)
ret <vscale x 32 x half> %v
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vx v22, v8, a0
-; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: vslideup.vx v14, v8, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 26)
ret <vscale x 32 x half> %v
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: li a1, 6
; SPILL-O2-NEXT: mul a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: li a1, 6
; SPILL-O2-NEXT: mul a0, a0, a1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vrgather.vi v10, v8, 0
; CHECK-NEXT: vrgather.vi v10, v12, 0, v0.t
-; CHECK-NEXT: vrgather.vi v11, v8, 1
-; CHECK-NEXT: vrgather.vi v11, v12, 1, v0.t
+; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vrgather.vi v9, v12, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
-; CHECK-NEXT: vmv.v.v v9, v11
; CHECK-NEXT: ret
%retval = call {<2 x i64>, <2 x i64>} @llvm.experimental.vector.deinterleave2.v4i64(<4 x i64> %vec)
ret {<2 x i64>, <2 x i64>} %retval
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vrgather.vi v10, v8, 0
; CHECK-NEXT: vrgather.vi v10, v12, 0, v0.t
-; CHECK-NEXT: vrgather.vi v11, v8, 1
-; CHECK-NEXT: vrgather.vi v11, v12, 1, v0.t
+; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vrgather.vi v9, v12, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
-; CHECK-NEXT: vmv.v.v v9, v11
; CHECK-NEXT: ret
%retval = call {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double> %vec)
ret {<2 x double>, <2 x double>} %retval
; RV32-LABEL: vector_interleave_v4i64_v2i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v10, v9
-; RV32-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vmv.v.i v12, 0
; RV32-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; RV64-LABEL: vector_interleave_v4i64_v2i64:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v10, v9
-; RV64-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.i v12, 0
; RV64-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; RV32-LABEL: vector_interleave_v4f64_v2f64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v10, v9
-; RV32-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vmv.v.i v12, 0
; RV32-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; RV64-LABEL: vector_interleave_v4f64_v2f64:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v10, v9
-; RV64-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.i v12, 0
; RV64-NEXT: vsetivli zero, 2, e64, m2, tu, ma
define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: vector_interleave_nxv4i64_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v10m2 killed $v10m2 killed $v8m4 def $v8m4
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsrl.vi v16, v12, 1
; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4 def $v8m4
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
; CHECK-LABEL: vector_interleave_nxv4f64_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v10m2 killed $v10m2 killed $v8m4 def $v8m4
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsrl.vi v16, v12, 1
; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4 def $v8m4
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v16, v0.t
+; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i16> %v
define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v16, v0.t
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v24, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i16> %v
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a1, a1, a3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vmv1r.v v1, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a3
+; CHECK-NEXT: vslidedown.vx v16, v0, a3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; CHECK-NEXT: slli a3, a1, 3
; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v8, (a3)
+; CHECK-NEXT: vl8re64.v v24, (a3)
; CHECK-NEXT: slli a3, a1, 1
; CHECK-NEXT: sub a4, a2, a3
; CHECK-NEXT: sltu a5, a2, a4
; CHECK-NEXT: addi a6, a6, -1
; CHECK-NEXT: and a6, a6, a5
; CHECK-NEXT: srli a5, a1, 3
-; CHECK-NEXT: vl8re64.v v16, (a0)
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v25, a5
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: vslidedown.vx v0, v16, a5
; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v20, v8, v0.t
+; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t
; CHECK-NEXT: bltu a4, a1, .LBB8_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB8_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v26, v1, a5
+; CHECK-NEXT: vslidedown.vx v2, v1, a5
; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB8_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v26
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfncvt.f.f.w v12, v24, v0.t
+; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfncvt.f.f.w v28, v8, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB8_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfncvt.f.f.w v8, v24, v0.t
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfncvt.f.f.w v24, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlseg2e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlseg3e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlseg4e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vlseg2e8.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vlseg2e64.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e64.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg2e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e64.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg3e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg4e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg5e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg6e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg7e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg8e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg2e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e64.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg3e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg4e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlseg2e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlseg3e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlseg4e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg2e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg3e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg4e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg5e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg6e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg7e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg8e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vlseg2e64.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vlseg2e8.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg2e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg3e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg4e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vlseg2e64.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg2e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg3e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg4e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg5e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg6e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg7e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vlseg8e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg2e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg3e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vlseg4e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
define void @test_vlseg2ff_mask_dead_value(<vscale x 16 x i16> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_value:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define void @test_vlseg2ff_mask_dead_all(<vscale x 16 x i16> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define void @test_vlseg2ff_mask_dead_value(<vscale x 16 x i16> %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_value:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define void @test_vlseg2ff_mask_dead_all(<vscale x 16 x i16> %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg3ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg4ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg3ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg4ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg3ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg4ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg5ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg6ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg7ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg8ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg3ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg4ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg5ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg6ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg7ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg8ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg3ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg4ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg3ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg4ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg5ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg6ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg7ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg8ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg3ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg4ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg5ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg6ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg7ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg8ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg3ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg4ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg5ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg6ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg7ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg8ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg3ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg4ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg5ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg6ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg7ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg8ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg3ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg4ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg5ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg6ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg7ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg8ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg3ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg4ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg5ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg6ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg7ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg8ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg3ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg4ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg5ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg6ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg7ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg8ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg3ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg4ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg5ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg6ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg7ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg8ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg3ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg4ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i64} @llvm.riscv.vlseg2ff.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i64} @llvm.riscv.vlseg2ff.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg2ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg3ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg4ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg5ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg6ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg7ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg8ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg2ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg3ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg4ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg5ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg6ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg7ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg8ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg2ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg3ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg4ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg5ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg6ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg7ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg8ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg2ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg3ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg4ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg5ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg6ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg7ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg8ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg2ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg3ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg4ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i64} @llvm.riscv.vlseg2ff.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg2ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg3ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg4ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg2ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg3ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg4ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg5ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg6ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg7ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg8ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg2ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg3ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg4ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg5ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg6ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg7ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg8ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg2ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg3ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg4ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; RV32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a3
; RV32-NEXT: vsetvli a3, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf4 v24, v10
+; RV32-NEXT: vsext.vf4 v16, v10
; RV32-NEXT: vsetvli zero, a4, e8, m2, ta, ma
-; RV32-NEXT: vluxei32.v v18, (a0), v24, v0.t
+; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t
; RV32-NEXT: bltu a1, a2, .LBB12_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB12_2:
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf4 v24, v8
+; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
-; RV32-NEXT: vmv4r.v v8, v16
+; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv32i8:
; RV64-NEXT: sltu a5, a1, a3
; RV64-NEXT: addi a5, a5, -1
; RV64-NEXT: and a3, a5, a3
-; RV64-NEXT: vmv1r.v v17, v0
+; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: mv a5, a3
; RV64-NEXT: bltu a3, a2, .LBB12_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: .LBB12_2:
; RV64-NEXT: srli a6, a2, 2
; RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
-; RV64-NEXT: vslidedown.vx v16, v17, a6
+; RV64-NEXT: vslidedown.vx v12, v13, a6
; RV64-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v24, v10
+; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, ma
-; RV64-NEXT: vmv1r.v v0, v16
-; RV64-NEXT: vluxei64.v v14, (a0), v24, v0.t
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a4, .LBB12_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: mv a1, a4
; RV64-NEXT: and a5, a5, a4
; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v17, a4
+; RV64-NEXT: vslidedown.vx v0, v13, a4
; RV64-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v24, v9
+; RV64-NEXT: vsext.vf8 v16, v9
; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, ma
-; RV64-NEXT: vluxei64.v v13, (a0), v24, v0.t
+; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a2, .LBB12_6
; RV64-NEXT: # %bb.5:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB12_6:
; RV64-NEXT: vsetvli a5, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v24, v8
+; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; RV64-NEXT: vmv1r.v v0, v17
-; RV64-NEXT: vluxei64.v v12, (a0), v24, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: sub a1, a3, a2
; RV64-NEXT: sltu a2, a3, a1
; RV64-NEXT: addi a2, a2, -1
; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v16, a4
+; RV64-NEXT: vslidedown.vx v0, v12, a4
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v11
; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t
-; RV64-NEXT: vmv4r.v v8, v12
+; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 32 x i8> %idxs
%v = call <vscale x 32 x i8> @llvm.vp.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr> %ptrs, <vscale x 32 x i1> %m, i32 %evl)
define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscale x 1 x i16> %vs12.i.i.i, <vscale x 1 x i16> %1, <vscale x 8 x i8> %v37) {
; NOSUBREG-LABEL: foo:
; NOSUBREG: # %bb.0: # %loopIR.preheader.i.i
-; NOSUBREG-NEXT: # kill: def $v10 killed $v10 def $v10m2
; NOSUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; NOSUBREG-NEXT: vmv.v.i v14, 0
; NOSUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma
; CHECK-NEXT: vfadd.vv v9, v8, v9
; CHECK-NEXT: vfmul.vv v8, v9, v8
+; CHECK-NEXT: # implicit-def: $x10
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_2: # %if.else
; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vv v9, v8, v9
; CHECK-NEXT: vfmul.vv v8, v9, v8
+; CHECK-NEXT: # implicit-def: $x10
; CHECK-NEXT: ret
entry:
%tobool = icmp eq i8 %cond, 0
define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsitofp_nxv32f16_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.f.x.w v12, v16, v0.t
+; CHECK-NEXT: vfncvt.f.x.w v28, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vfncvt.f.x.w v24, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsoxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsoxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsoxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsoxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsoxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsoxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsoxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsoxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsoxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsoxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg2_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
-; CHECK-NEXT: ret
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
+; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
ret void
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg2_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsoxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg2_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsoxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsoxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg6_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg7_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg8_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsoxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsoxseg2_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsoxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsoxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsoxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsoxseg2_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsoxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsoxseg2_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsoxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg2_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsoxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsoxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsoxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsoxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsoxseg2_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsoxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsseg3_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsseg4_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4i64(<vscale x 4 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsseg3_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsseg4_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
define void @test_vssseg3_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
define void @test_vssseg4_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4i64(<vscale x 4 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i64(<vscale x 4 x i64> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
define void @test_vssseg3_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
define void @test_vssseg4_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsuxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsuxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsuxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsuxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsuxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsuxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsuxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsuxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsuxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
define void @test_vsuxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg2_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
-; CHECK-NEXT: ret
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
+; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
ret void
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg2_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
define void @test_vsuxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg2_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsuxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
define void @test_vsuxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg6_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg7_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg8_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
define void @test_vsuxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
define void @test_vsuxseg2_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsuxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
define void @test_vsuxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
define void @test_vsuxseg2_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
define void @test_vsuxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
define void @test_vsuxseg2_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
define void @test_vsuxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg2_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
define void @test_vsuxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
define void @test_vsuxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
define void @test_vsuxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
define void @test_vsuxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
define void @test_vsuxseg2_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
define void @test_vsuxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a1, a1, a3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vmv1r.v v1, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a3
+; CHECK-NEXT: vslidedown.vx v16, v0, a3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; CHECK-NEXT: slli a3, a1, 3
; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v8, (a3)
+; CHECK-NEXT: vl8re64.v v24, (a3)
; CHECK-NEXT: slli a3, a1, 1
; CHECK-NEXT: sub a4, a2, a3
; CHECK-NEXT: sltu a5, a2, a4
; CHECK-NEXT: addi a6, a6, -1
; CHECK-NEXT: and a6, a6, a5
; CHECK-NEXT: srli a5, a1, 3
-; CHECK-NEXT: vl8re64.v v16, (a0)
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v25, a5
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: vslidedown.vx v0, v16, a5
; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
+; CHECK-NEXT: vnsrl.wi v20, v24, 0, v0.t
; CHECK-NEXT: bltu a4, a1, .LBB17_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB17_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v26, v1, a5
+; CHECK-NEXT: vslidedown.vx v2, v1, a5
; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB17_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v26
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v12, v24, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vnsrl.wi v28, v8, 0, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB17_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vuitofp_nxv32f16_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.f.xu.w v12, v16, v0.t
+; CHECK-NEXT: vfncvt.f.xu.w v28, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vfncvt.f.xu.w v24, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef,<vscale x 16 x i16> undef, ptr %base, i64 0)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 0)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a1)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 0)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a1)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 0, i64 1)
define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base) {
; CHECK-LABEL: test_vsseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset) {
; CHECK-LABEL: test_vssseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma
; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vle8.v v12, (a0)
; RV32-NEXT: vmv1r.v v14, v9
-; RV32-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV32-NEXT: vrgather.vv v10, v8, v12
; RV32-NEXT: vid.v v8
; RV32-NEXT: vrsub.vi v8, v8, 15
; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV64-NEXT: vle8.v v12, (a0)
; RV64-NEXT: vmv1r.v v14, v9
-; RV64-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV64-NEXT: vrgather.vv v10, v8, v12
; RV64-NEXT: vid.v v8
; RV64-NEXT: vrsub.vi v8, v8, 15
; CHECK-LABEL: v8i16_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v12, v9
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 15
; RV32-NEXT: addi a0, a0, %lo(.LCPI15_0)
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; RV32-NEXT: vle16.v v16, (a0)
-; RV32-NEXT: vmv2r.v v20, v10
-; RV32-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
-; RV32-NEXT: vrgather.vv v12, v8, v16
-; RV32-NEXT: vid.v v8
-; RV32-NEXT: vrsub.vi v8, v8, 15
+; RV32-NEXT: vle16.v v20, (a0)
+; RV32-NEXT: vmv2r.v v16, v10
+; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vrgather.vv v8, v12, v20
+; RV32-NEXT: vid.v v12
+; RV32-NEXT: vrsub.vi v12, v12, 15
; RV32-NEXT: lui a0, 16
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; RV32-NEXT: vrgather.vv v12, v20, v8, v0.t
-; RV32-NEXT: vmv.v.v v8, v12
+; RV32-NEXT: vrgather.vv v8, v16, v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: v16i16_2:
; RV64-NEXT: addi a0, a0, %lo(.LCPI15_0)
; RV64-NEXT: li a1, 32
; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; RV64-NEXT: vle16.v v16, (a0)
-; RV64-NEXT: vmv2r.v v20, v10
-; RV64-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
-; RV64-NEXT: vrgather.vv v12, v8, v16
-; RV64-NEXT: vid.v v8
-; RV64-NEXT: vrsub.vi v8, v8, 15
+; RV64-NEXT: vle16.v v20, (a0)
+; RV64-NEXT: vmv2r.v v16, v10
+; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vrgather.vv v8, v12, v20
+; RV64-NEXT: vid.v v12
+; RV64-NEXT: vrsub.vi v12, v12, 15
; RV64-NEXT: lui a0, 16
; RV64-NEXT: addiw a0, a0, -1
; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; RV64-NEXT: vrgather.vv v12, v20, v8, v0.t
-; RV64-NEXT: vmv.v.v v8, v12
+; RV64-NEXT: vrgather.vv v8, v16, v12, v0.t
; RV64-NEXT: ret
%v32i16 = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <32 x i16> %v32i16
; CHECK-LABEL: v4i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v12, v9
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 7
; CHECK-LABEL: v8i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv2r.v v16, v10
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vid.v v20
; CHECK-NEXT: vrsub.vi v24, v20, 15
; CHECK-LABEL: v2i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v8, 1
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v12, v8, 1
+; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v9, 1
+; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v12, 2
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: ret
%v4i64 = shufflevector <2 x i64> %a, <2 x i64> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x i64> %v4i64
; RV32-LABEL: v4i64_2:
; RV32: # %bb.0:
; RV32-NEXT: vmv2r.v v16, v10
-; RV32-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vid.v v20
-; RV32-NEXT: vrsub.vi v21, v20, 7
+; RV32-NEXT: vid.v v18
+; RV32-NEXT: vrsub.vi v19, v18, 7
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; RV32-NEXT: vrgatherei16.vv v12, v8, v21
+; RV32-NEXT: vrgatherei16.vv v12, v8, v19
; RV32-NEXT: li a0, 15
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT: vrsub.vi v8, v20, 3
+; RV32-NEXT: vrsub.vi v8, v18, 3
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV64-LABEL: v4i64_2:
; RV64: # %bb.0:
; RV64-NEXT: vmv2r.v v16, v10
-; RV64-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vid.v v20
; RV64-NEXT: vrsub.vi v24, v20, 7
; CHECK-LABEL: v8f16_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v12, v9
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 15
define <32 x half> @v16f16_2(<16 x half> %a) {
; CHECK-LABEL: v16f16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
; CHECK-NEXT: lui a0, %hi(.LCPI35_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI35_0)
; CHECK-NEXT: li a1, 32
; CHECK-LABEL: v4f32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v12, v9
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 7
; CHECK-LABEL: v8f32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv2r.v v16, v10
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vid.v v20
; CHECK-NEXT: vrsub.vi v24, v20, 15
; CHECK-LABEL: v2f64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v8, 1
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v12, v8, 1
+; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v9, 1
+; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v12, 2
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: ret
%v4f64 = shufflevector <2 x double> %a, <2 x double> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x double> %v4f64
; RV32-LABEL: v4f64_2:
; RV32: # %bb.0:
; RV32-NEXT: vmv2r.v v16, v10
-; RV32-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vid.v v20
-; RV32-NEXT: vrsub.vi v21, v20, 7
+; RV32-NEXT: vid.v v18
+; RV32-NEXT: vrsub.vi v19, v18, 7
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; RV32-NEXT: vrgatherei16.vv v12, v8, v21
+; RV32-NEXT: vrgatherei16.vv v12, v8, v19
; RV32-NEXT: li a0, 15
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT: vrsub.vi v8, v20, 3
+; RV32-NEXT: vrsub.vi v8, v18, 3
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV64-LABEL: v4f64_2:
; RV64: # %bb.0:
; RV64-NEXT: vmv2r.v v16, v10
-; RV64-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vid.v v20
; RV64-NEXT: vrsub.vi v24, v20, 7